#define p2m_pod_offline_or_broken_replace(pg) BUG_ON(pg != NULL)
#endif
+#ifndef PGC_reserved
+#define PGC_reserved 0
+#endif
+
/*
* Comma-separated list of hexadecimal page numbers containing bad bytes.
* e.g. 'badpage=0x3f45,0x8a321'.
for ( i = 0; i < nr; i++ )
{
- ASSERT(!(pg[i].count_info & ~PGC_extra));
+ ASSERT(!(pg[i].count_info & ~(PGC_extra | PGC_reserved)));
if ( pg[i].count_info & PGC_extra )
extra_pages++;
}
page_set_owner(&pg[i], d);
smp_wmb(); /* Domain pointer must be visible before updating refcnt. */
pg[i].count_info =
- (pg[i].count_info & PGC_extra) | PGC_allocated | 1;
+ (pg[i].count_info & (PGC_extra | PGC_reserved)) | PGC_allocated | 1;
+
page_list_add_tail(&pg[i], page_to_list(d, &pg[i]));
}
/* TODO: asynchronous scrubbing for pages of static memory. */
scrub_one_page(pg);
}
+
+ /* In case initializing page of static memory, mark it PGC_reserved. */
+ pg[i].count_info |= PGC_reserved;
}
}
+
+/*
+ * Acquire nr_mfns contiguous reserved pages, starting at #smfn, of
+ * static memory.
+ * This function needs to be reworked if used outside of boot.
+ */
+static struct page_info * __init acquire_staticmem_pages(mfn_t smfn,
+ unsigned long nr_mfns,
+ unsigned int memflags)
+{
+ bool need_tlbflush = false;
+ uint32_t tlbflush_timestamp = 0;
+ unsigned long i;
+ struct page_info *pg;
+
+ ASSERT(nr_mfns);
+ for ( i = 0; i < nr_mfns; i++ )
+ if ( !mfn_valid(mfn_add(smfn, i)) )
+ return NULL;
+
+ pg = mfn_to_page(smfn);
+
+ spin_lock(&heap_lock);
+
+ for ( i = 0; i < nr_mfns; i++ )
+ {
+ /* The page should be reserved and not yet allocated. */
+ if ( pg[i].count_info != (PGC_state_free | PGC_reserved) )
+ {
+ printk(XENLOG_ERR
+ "pg[%lu] Static MFN %"PRI_mfn" c=%#lx t=%#x\n",
+ i, mfn_x(smfn) + i,
+ pg[i].count_info, pg[i].tlbflush_timestamp);
+ goto out_err;
+ }
+
+ if ( !(memflags & MEMF_no_tlbflush) )
+ accumulate_tlbflush(&need_tlbflush, &pg[i],
+ &tlbflush_timestamp);
+
+ /*
+ * Preserve flag PGC_reserved and change page state
+ * to PGC_state_inuse.
+ */
+ pg[i].count_info = PGC_reserved | PGC_state_inuse;
+ /* Initialise fields which have other uses for free pages. */
+ pg[i].u.inuse.type_info = 0;
+ page_set_owner(&pg[i], NULL);
+ }
+
+ spin_unlock(&heap_lock);
+
+ if ( need_tlbflush )
+ filtered_flush_tlb_mask(tlbflush_timestamp);
+
+ /*
+ * Ensure cache and RAM are consistent for platforms where the guest
+ * can control its own visibility of/through the cache.
+ */
+ for ( i = 0; i < nr_mfns; i++ )
+ flush_page_to_ram(mfn_x(smfn) + i, !(memflags & MEMF_no_icache_flush));
+
+ return pg;
+
+ out_err:
+ while ( i-- )
+ pg[i].count_info = PGC_reserved | PGC_state_free;
+
+ spin_unlock(&heap_lock);
+
+ return NULL;
+}
+
+/*
+ * Acquire nr_mfns contiguous pages, starting at #smfn, of static memory,
+ * then assign them to one specific domain #d.
+ */
+int __init acquire_domstatic_pages(struct domain *d, mfn_t smfn,
+ unsigned long nr_mfns, unsigned int memflags)
+{
+ struct page_info *pg;
+
+ ASSERT(!in_irq());
+
+ pg = acquire_staticmem_pages(smfn, nr_mfns, memflags);
+ if ( !pg )
+ return -ENOENT;
+
+ if ( !d || (memflags & (MEMF_no_owner | MEMF_no_refcount)) )
+ {
+ /*
+ * Respective handling omitted here because right now
+ * acquired static memory is only for guest RAM.
+ */
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+
+ if ( assign_pages(pg, nr_mfns, d, memflags) )
+ {
+ free_staticmem_pages(pg, nr_mfns, memflags & MEMF_no_scrub);
+ return -EINVAL;
+ }
+
+ return 0;
+}
#endif
/*